Remove unnecessary SYMBOL_NAME* macros.
Signed-off-by: Keir Fraser <keir@xensource.com>
not_multiboot_msg:
.asciz "ERR: Not a Multiboot bootloader!"
bad_cpu:
- mov $SYMBOL_NAME(bad_cpu_msg)-__PAGE_OFFSET,%esi
+ mov $bad_cpu_msg-__PAGE_OFFSET,%esi
jmp print_err
not_multiboot:
- mov $SYMBOL_NAME(not_multiboot_msg)-__PAGE_OFFSET,%esi
+ mov $not_multiboot_msg-__PAGE_OFFSET,%esi
print_err:
mov $0xB8000,%edi # VGA framebuffer
1: mov (%esi),%bl
mov $(__HYPERVISOR_CS << 16),%eax
mov %dx,%ax /* selector = 0x0010 = cs */
mov $0x8E00,%dx /* interrupt gate - dpl=0, present */
- lea SYMBOL_NAME(idt_table)-__PAGE_OFFSET,%edi
+ lea idt_table-__PAGE_OFFSET,%edi
mov $256,%ecx
1: mov %eax,(%edi)
mov %edx,4(%edi)
mov %eax,%ds
mov %eax,%es
pushl $int_msg
- call SYMBOL_NAME(printf)
+ call printf
1: jmp 1b
/*** STACK LOCATION ***/
ENTRY(stack_start)
- .long SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200 - __PAGE_OFFSET
+ .long cpu0_stack + STACK_SIZE - 200 - __PAGE_OFFSET
.long __HYPERVISOR_DS
/*** DESCRIPTOR TABLES ***/
-.globl SYMBOL_NAME(idt)
-.globl SYMBOL_NAME(gdt)
+.globl idt
+.globl gdt
ALIGN
.word 0
idt_descr:
.word 256*8-1
-SYMBOL_NAME(idt):
- .long SYMBOL_NAME(idt_table)
+idt:
+ .long idt_table
.word 0
gdt_descr:
.word (LAST_RESERVED_GDT_ENTRY*8)+7
-SYMBOL_NAME(gdt):
- .long SYMBOL_NAME(gdt_table) /* gdt base */
+gdt:
+ .long gdt_table /* gdt base */
.word 0
nopaging_gdt_descr:
.word (LAST_RESERVED_GDT_ENTRY*8)+7
- .long SYMBOL_NAME(gdt_table)-__PAGE_OFFSET
+ .long gdt_table-__PAGE_OFFSET
ALIGN
/* NB. Rings != 0 get access up to 0xFC400000. This allows access to the */
.text
.code32
-
+
ENTRY(start)
jmp __start
-
+
.org 0x004
/*** MULTIBOOT HEADER ****/
/* Magic number indicating a Multiboot header. */
/*** DESCRIPTOR TABLES ***/
-.globl SYMBOL_NAME(idt)
-.globl SYMBOL_NAME(gdt)
+.globl idt
+.globl gdt
.org 0x1f0
.word (LAST_RESERVED_GDT_ENTRY*8)+7
.word 0
gdt_descr:
.word (LAST_RESERVED_GDT_ENTRY*8)+7
-SYMBOL_NAME(gdt):
- .quad SYMBOL_NAME(gdt_table)
+gdt:
+ .quad gdt_table
.word 0
idt_descr:
.word 256*16-1
-SYMBOL_NAME(idt):
- .quad SYMBOL_NAME(idt_table)
+idt:
+ .quad idt_table
ENTRY(stack_start)
- .quad SYMBOL_NAME(cpu0_stack) + STACK_SIZE - 200
+ .quad cpu0_stack + STACK_SIZE - 200
high_start:
.quad __high_start
ignore_int:
cld
leaq int_msg(%rip),%rdi
- call SYMBOL_NAME(printf)
+ call printf
1: jmp 1b
#else
.long 0x100200 # gdt_table
#endif
-
-.globl SYMBOL_NAME(trampoline_end)
-SYMBOL_NAME_LABEL(trampoline_end)
+
+ENTRY(trampoline_end)
#endif /* CONFIG_SMP */
ENTRY(vmx_asm_vmexit_handler)
/* selectors are restored/saved by VMX */
VMX_SAVE_ALL_NOSEGREGS
- call SYMBOL_NAME(vmx_vmexit_handler)
+ call vmx_vmexit_handler
jmp vmx_asm_do_resume
ENTRY(vmx_asm_do_launch)
/* VMLUANCH */
.byte 0x0f,0x01,0xc2
pushf
- call SYMBOL_NAME(vm_launch_fail)
+ call vm_launch_fail
hlt
ALIGN
/*test_softirqs:*/
movl EDOMAIN_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
- test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
+ test %ecx,irq_stat(%eax,1)
jnz vmx_process_softirqs
vmx_restore_all_guest:
- call SYMBOL_NAME(load_cr2)
+ call load_cr2
/*
* Check if we are going back to VMX-based VM
* By this time, all the setups in the VMCS must be complete.
/* VMRESUME */
.byte 0x0f,0x01,0xc3
pushf
- call SYMBOL_NAME(vm_resume_fail)
+ call vm_resume_fail
/* Should never reach here */
hlt
ALIGN
vmx_process_softirqs:
sti
- call SYMBOL_NAME(do_softirq)
+ call do_softirq
jmp vmx_test_all_events
#endif
GET_CURRENT(%ebx)
andl $(NR_hypercalls-1),%eax
PERFC_INCR(PERFC_hypercalls, %eax)
- call *SYMBOL_NAME(hypercall_table)(,%eax,4)
+ call *hypercall_table(,%eax,4)
movl %eax,UREGS_eax(%esp) # save the return value
test_all_events:
/*test_softirqs:*/
movl EDOMAIN_processor(%ebx),%eax
shl $IRQSTAT_shift,%eax
- test %ecx,SYMBOL_NAME(irq_stat)(%eax,1)
+ test %ecx,irq_stat(%eax,1)
jnz process_softirqs
/*test_guest_events:*/
movl EDOMAIN_vcpu_info(%ebx),%eax
ALIGN
process_softirqs:
sti
- call SYMBOL_NAME(do_softirq)
+ call do_softirq
jmp test_all_events
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */
pushl %edx # push the cpu_user_regs pointer
GET_CURRENT(%ebx)
PERFC_INCR(PERFC_exceptions, %eax)
- call *SYMBOL_NAME(exception_table)(,%eax,4)
+ call *exception_table(,%eax,4)
addl $4,%esp
movl UREGS_eflags(%esp),%eax
movb UREGS_cs(%esp),%al
movl %esp,%edx
pushl %edx # push the cpu_user_regs pointer
pushl %esi # push the trapnr (entry vector)
- call SYMBOL_NAME(fatal_trap)
+ call fatal_trap
ud2
ENTRY(coprocessor_error)
movl %esp,%edx
pushl %ebx # reason
pushl %edx # regs
- call SYMBOL_NAME(do_nmi)
+ call do_nmi
addl $8,%esp
jmp ret_from_intr
andb $0xf,%al
orb $0x4,%al
outb %al,$0x61
- cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
+ cmpb $'i',%ss:opt_nmi # nmi=ignore
je nmi_out
- bts $0,%ss:SYMBOL_NAME(nmi_softirq_reason)
- bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
- cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
+ bts $0,%ss:nmi_softirq_reason
+ bts $NMI_SOFTIRQ,%ss:irq_stat
+ cmpb $'d',%ss:opt_nmi # nmi=dom0
je nmi_out
movl $(__HYPERVISOR_DS),%edx # nmi=fatal
movl %edx,%ds
movl %edx,%es
movl %esp,%edx
push %edx
- call SYMBOL_NAME(mem_parity_error)
+ call mem_parity_error
addl $4,%esp
nmi_out:movl %ss:UREGS_eflags(%esp),%eax
movb %ss:UREGS_cs(%esp),%al
andb $0xf,%al
orb $0x8,%al
outb %al,$0x61
- cmpb $'i',%ss:SYMBOL_NAME(opt_nmi) # nmi=ignore
+ cmpb $'i',%ss:opt_nmi # nmi=ignore
je nmi_out
- bts $1,%ss:SYMBOL_NAME(nmi_softirq_reason)
- bts $NMI_SOFTIRQ,%ss:SYMBOL_NAME(irq_stat)
- cmpb $'d',%ss:SYMBOL_NAME(opt_nmi) # nmi=dom0
+ bts $1,%ss:nmi_softirq_reason
+ bts $NMI_SOFTIRQ,%ss:irq_stat
+ cmpb $'d',%ss:opt_nmi # nmi=dom0
je nmi_out
movl $(__HYPERVISOR_DS),%edx # nmi=fatal
movl %edx,%ds
movl %edx,%es
movl %esp,%edx
push %edx
- call SYMBOL_NAME(io_check_error)
+ call io_check_error
addl $4,%esp
jmp nmi_out
# Ensure we return success even if we return via schedule_tail()
xorl %eax,%eax
movl %eax,UREGS_eax+4(%esp)
- jmp SYMBOL_NAME(do_sched_op)
+ jmp do_sched_op
do_switch_vm86:
# Discard the return address
.data
ENTRY(exception_table)
- .long SYMBOL_NAME(do_divide_error)
- .long SYMBOL_NAME(do_debug)
+ .long do_divide_error
+ .long do_debug
.long 0 # nmi
- .long SYMBOL_NAME(do_int3)
- .long SYMBOL_NAME(do_overflow)
- .long SYMBOL_NAME(do_bounds)
- .long SYMBOL_NAME(do_invalid_op)
- .long SYMBOL_NAME(math_state_restore)
+ .long do_int3
+ .long do_overflow
+ .long do_bounds
+ .long do_invalid_op
+ .long math_state_restore
.long 0 # double fault
- .long SYMBOL_NAME(do_coprocessor_segment_overrun)
- .long SYMBOL_NAME(do_invalid_TSS)
- .long SYMBOL_NAME(do_segment_not_present)
- .long SYMBOL_NAME(do_stack_segment)
- .long SYMBOL_NAME(do_general_protection)
- .long SYMBOL_NAME(do_page_fault)
- .long SYMBOL_NAME(do_spurious_interrupt_bug)
- .long SYMBOL_NAME(do_coprocessor_error)
- .long SYMBOL_NAME(do_alignment_check)
- .long SYMBOL_NAME(do_machine_check)
- .long SYMBOL_NAME(do_simd_coprocessor_error)
+ .long do_coprocessor_segment_overrun
+ .long do_invalid_TSS
+ .long do_segment_not_present
+ .long do_stack_segment
+ .long do_general_protection
+ .long do_page_fault
+ .long do_spurious_interrupt_bug
+ .long do_coprocessor_error
+ .long do_alignment_check
+ .long do_machine_check
+ .long do_simd_coprocessor_error
ENTRY(hypercall_table)
- .long SYMBOL_NAME(do_set_trap_table) /* 0 */
- .long SYMBOL_NAME(do_mmu_update)
- .long SYMBOL_NAME(do_set_gdt)
- .long SYMBOL_NAME(do_stack_switch)
- .long SYMBOL_NAME(do_set_callbacks)
- .long SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
- .long SYMBOL_NAME(do_arch_sched_op)
- .long SYMBOL_NAME(do_dom0_op)
- .long SYMBOL_NAME(do_set_debugreg)
- .long SYMBOL_NAME(do_get_debugreg)
- .long SYMBOL_NAME(do_update_descriptor) /* 10 */
- .long SYMBOL_NAME(do_set_fast_trap)
- .long SYMBOL_NAME(do_dom_mem_op)
- .long SYMBOL_NAME(do_multicall)
- .long SYMBOL_NAME(do_update_va_mapping)
- .long SYMBOL_NAME(do_set_timer_op) /* 15 */
- .long SYMBOL_NAME(do_event_channel_op)
- .long SYMBOL_NAME(do_xen_version)
- .long SYMBOL_NAME(do_console_io)
- .long SYMBOL_NAME(do_physdev_op)
- .long SYMBOL_NAME(do_grant_table_op) /* 20 */
- .long SYMBOL_NAME(do_vm_assist)
- .long SYMBOL_NAME(do_update_va_mapping_otherdomain)
- .long SYMBOL_NAME(do_switch_vm86)
- .long SYMBOL_NAME(do_boot_vcpu)
- .long SYMBOL_NAME(do_ni_hypercall) /* 25 */
- .long SYMBOL_NAME(do_mmuext_op)
+ .long do_set_trap_table /* 0 */
+ .long do_mmu_update
+ .long do_set_gdt
+ .long do_stack_switch
+ .long do_set_callbacks
+ .long do_fpu_taskswitch /* 5 */
+ .long do_arch_sched_op
+ .long do_dom0_op
+ .long do_set_debugreg
+ .long do_get_debugreg
+ .long do_update_descriptor /* 10 */
+ .long do_set_fast_trap
+ .long do_dom_mem_op
+ .long do_multicall
+ .long do_update_va_mapping
+ .long do_set_timer_op /* 15 */
+ .long do_event_channel_op
+ .long do_xen_version
+ .long do_console_io
+ .long do_physdev_op
+ .long do_grant_table_op /* 20 */
+ .long do_vm_assist
+ .long do_update_va_mapping_otherdomain
+ .long do_switch_vm86
+ .long do_boot_vcpu
+ .long do_ni_hypercall /* 25 */
+ .long do_mmuext_op
.rept NR_hypercalls-((.-hypercall_table)/4)
- .long SYMBOL_NAME(do_ni_hypercall)
+ .long do_ni_hypercall
.endr
sti
movq %r10,%rcx
andq $(NR_hypercalls-1),%rax
- leaq SYMBOL_NAME(hypercall_table)(%rip),%r10
+ leaq hypercall_table(%rip),%r10
PERFC_INCR(PERFC_hypercalls, %rax)
callq *(%r10,%rax,8)
movq %rax,UREGS_rax(%rsp) # save the return value
/*test_softirqs:*/
movl EDOMAIN_processor(%rbx),%eax
shl $IRQSTAT_shift,%rax
- leaq SYMBOL_NAME(irq_stat)(%rip),%rcx
+ leaq irq_stat(%rip),%rcx
testl $~0,(%rcx,%rax,1)
jnz process_softirqs
/*test_guest_events:*/
ENTRY(vmx_asm_vmexit_handler)
/* selectors are restored/saved by VMX */
VMX_SAVE_ALL_NOSEGREGS
- call SYMBOL_NAME(vmx_vmexit_handler)
+ call vmx_vmexit_handler
jmp vmx_asm_do_resume
ENTRY(vmx_asm_do_launch)
/* VMLUANCH */
.byte 0x0f,0x01,0xc2
pushfq
- call SYMBOL_NAME(vm_launch_fail)
+ call vm_launch_fail
hlt
ALIGN
/*test_softirqs:*/
movl EDOMAIN_processor(%rbx),%eax
shl $IRQSTAT_shift,%rax
- leaq SYMBOL_NAME(irq_stat)(%rip), %rdx
+ leaq irq_stat(%rip), %rdx
testl $~0,(%rdx,%rax,1)
jnz vmx_process_softirqs
vmx_restore_all_guest:
- call SYMBOL_NAME(load_cr2)
+ call load_cr2
/*
* Check if we are going back to VMX-based VM
* By this time, all the setups in the VMCS must be complete.
/* VMRESUME */
.byte 0x0f,0x01,0xc3
pushfq
- call SYMBOL_NAME(vm_resume_fail)
+ call vm_resume_fail
/* Should never reach here */
hlt
ALIGN
vmx_process_softirqs:
sti
- call SYMBOL_NAME(do_softirq)
+ call do_softirq
jmp vmx_test_all_events
#endif
/* %rbx: struct exec_domain */
process_softirqs:
sti
- call SYMBOL_NAME(do_softirq)
+ call do_softirq
jmp test_all_events
/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
/* Push new frame at registered guest-OS stack base. */
pushq %rdx
movq %rbx,%rdi
- call SYMBOL_NAME(toggle_guest_mode)
+ call toggle_guest_mode
popq %rdx
movq EDOMAIN_kernel_sp(%rbx),%rsi
jmp 2f
sti
movq %rsp,%rdi
movl UREGS_entry_vector(%rsp),%eax
- leaq SYMBOL_NAME(exception_table)(%rip),%rdx
+ leaq exception_table(%rip),%rdx
GET_CURRENT(%rbx)
PERFC_INCR(PERFC_exceptions, %rax)
callq *(%rdx,%rax,8)
FATAL_exception_with_ints_disabled:
movl UREGS_entry_vector(%rsp),%edi
movq %rsp,%rsi
- call SYMBOL_NAME(fatal_trap)
+ call fatal_trap
ud2
ENTRY(divide_error)
inb $0x61,%al
movl %eax,%esi # reason
movq %rsp,%rdi # regs
- call SYMBOL_NAME(do_nmi)
+ call do_nmi
jmp restore_all_xen
do_arch_sched_op:
# Ensure we return success even if we return via schedule_tail()
xorl %eax,%eax
movq %rax,UREGS_rax+8(%rsp)
- jmp SYMBOL_NAME(do_sched_op)
+ jmp do_sched_op
.data
ENTRY(exception_table)
- .quad SYMBOL_NAME(do_divide_error)
- .quad SYMBOL_NAME(do_debug)
+ .quad do_divide_error
+ .quad do_debug
.quad 0 # nmi
- .quad SYMBOL_NAME(do_int3)
- .quad SYMBOL_NAME(do_overflow)
- .quad SYMBOL_NAME(do_bounds)
- .quad SYMBOL_NAME(do_invalid_op)
- .quad SYMBOL_NAME(math_state_restore)
- .quad SYMBOL_NAME(do_double_fault)
- .quad SYMBOL_NAME(do_coprocessor_segment_overrun)
- .quad SYMBOL_NAME(do_invalid_TSS)
- .quad SYMBOL_NAME(do_segment_not_present)
- .quad SYMBOL_NAME(do_stack_segment)
- .quad SYMBOL_NAME(do_general_protection)
- .quad SYMBOL_NAME(do_page_fault)
- .quad SYMBOL_NAME(do_spurious_interrupt_bug)
- .quad SYMBOL_NAME(do_coprocessor_error)
- .quad SYMBOL_NAME(do_alignment_check)
- .quad SYMBOL_NAME(do_machine_check)
- .quad SYMBOL_NAME(do_simd_coprocessor_error)
+ .quad do_int3
+ .quad do_overflow
+ .quad do_bounds
+ .quad do_invalid_op
+ .quad math_state_restore
+ .quad do_double_fault
+ .quad do_coprocessor_segment_overrun
+ .quad do_invalid_TSS
+ .quad do_segment_not_present
+ .quad do_stack_segment
+ .quad do_general_protection
+ .quad do_page_fault
+ .quad do_spurious_interrupt_bug
+ .quad do_coprocessor_error
+ .quad do_alignment_check
+ .quad do_machine_check
+ .quad do_simd_coprocessor_error
ENTRY(hypercall_table)
- .quad SYMBOL_NAME(do_set_trap_table) /* 0 */
- .quad SYMBOL_NAME(do_mmu_update)
- .quad SYMBOL_NAME(do_set_gdt)
- .quad SYMBOL_NAME(do_stack_switch)
- .quad SYMBOL_NAME(do_set_callbacks)
- .quad SYMBOL_NAME(do_fpu_taskswitch) /* 5 */
- .quad SYMBOL_NAME(do_arch_sched_op)
- .quad SYMBOL_NAME(do_dom0_op)
- .quad SYMBOL_NAME(do_set_debugreg)
- .quad SYMBOL_NAME(do_get_debugreg)
- .quad SYMBOL_NAME(do_update_descriptor) /* 10 */
- .quad SYMBOL_NAME(do_ni_hypercall)
- .quad SYMBOL_NAME(do_dom_mem_op)
- .quad SYMBOL_NAME(do_multicall)
- .quad SYMBOL_NAME(do_update_va_mapping)
- .quad SYMBOL_NAME(do_set_timer_op) /* 15 */
- .quad SYMBOL_NAME(do_event_channel_op)
- .quad SYMBOL_NAME(do_xen_version)
- .quad SYMBOL_NAME(do_console_io)
- .quad SYMBOL_NAME(do_physdev_op)
- .quad SYMBOL_NAME(do_grant_table_op) /* 20 */
- .quad SYMBOL_NAME(do_vm_assist)
- .quad SYMBOL_NAME(do_update_va_mapping_otherdomain)
- .quad SYMBOL_NAME(do_switch_to_user)
- .quad SYMBOL_NAME(do_boot_vcpu)
- .quad SYMBOL_NAME(do_set_segment_base) /* 25 */
- .quad SYMBOL_NAME(do_mmuext_op)
+ .quad do_set_trap_table /* 0 */
+ .quad do_mmu_update
+ .quad do_set_gdt
+ .quad do_stack_switch
+ .quad do_set_callbacks
+ .quad do_fpu_taskswitch /* 5 */
+ .quad do_arch_sched_op
+ .quad do_dom0_op
+ .quad do_set_debugreg
+ .quad do_get_debugreg
+ .quad do_update_descriptor /* 10 */
+ .quad do_ni_hypercall
+ .quad do_dom_mem_op
+ .quad do_multicall
+ .quad do_update_va_mapping
+ .quad do_set_timer_op /* 15 */
+ .quad do_event_channel_op
+ .quad do_xen_version
+ .quad do_console_io
+ .quad do_physdev_op
+ .quad do_grant_table_op /* 20 */
+ .quad do_vm_assist
+ .quad do_update_va_mapping_otherdomain
+ .quad do_switch_to_user
+ .quad do_boot_vcpu
+ .quad do_set_segment_base /* 25 */
+ .quad do_mmuext_op
.rept NR_hypercalls-((.-hypercall_table)/4)
- .quad SYMBOL_NAME(do_ni_hypercall)
+ .quad do_ni_hypercall
.endr
/* Linkage for x86 */
#define __ALIGN .align 16,0x90
#define __ALIGN_STR ".align 16,0x90"
-#define SYMBOL_NAME_STR(X) #X
-#define SYMBOL_NAME(X) X
-#define SYMBOL_NAME_LABEL(X) X##:
#ifdef __ASSEMBLY__
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
-#define ENTRY(name) \
- .globl SYMBOL_NAME(name); \
- ALIGN; \
- SYMBOL_NAME_LABEL(name)
+#define ENTRY(name) \
+ .globl name; \
+ ALIGN; \
+ name:
#endif
#define barrier() __asm__ __volatile__("": : :"memory")
/* Maybe auto-generate the following two cases (quoted vs. unquoted). */
#ifndef __ASSEMBLY__
-#define __SAVE_ALL_PRE \
- "cld;" \
- "pushl %eax;" \
- "pushl %ebp;" \
- "pushl %edi;" \
- "pushl %esi;" \
- "pushl %edx;" \
- "pushl %ecx;" \
- "pushl %ebx;" \
- "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);" \
- "jz 2f;" \
- "call setup_vm86_frame;" \
- "jmp 3f;" \
- "2:testb $3,"STR(UREGS_cs)"(%esp);" \
- "jz 1f;" \
- "movl %ds,"STR(UREGS_ds)"(%esp);" \
- "movl %es,"STR(UREGS_es)"(%esp);" \
- "movl %fs,"STR(UREGS_fs)"(%esp);" \
- "movl %gs,"STR(UREGS_gs)"(%esp);" \
+#define __SAVE_ALL_PRE \
+ "cld;" \
+ "pushl %eax;" \
+ "pushl %ebp;" \
+ "pushl %edi;" \
+ "pushl %esi;" \
+ "pushl %edx;" \
+ "pushl %ecx;" \
+ "pushl %ebx;" \
+ "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);" \
+ "jz 2f;" \
+ "call setup_vm86_frame;" \
+ "jmp 3f;" \
+ "2:testb $3,"STR(UREGS_cs)"(%esp);" \
+ "jz 1f;" \
+ "movl %ds,"STR(UREGS_ds)"(%esp);" \
+ "movl %es,"STR(UREGS_es)"(%esp);" \
+ "movl %fs,"STR(UREGS_fs)"(%esp);" \
+ "movl %gs,"STR(UREGS_gs)"(%esp);" \
"3:"
-#define SAVE_ALL_NOSEGREGS(_reg) \
- __SAVE_ALL_PRE \
+#define SAVE_ALL_NOSEGREGS(_reg) \
+ __SAVE_ALL_PRE \
"1:"
-#define SET_XEN_SEGMENTS(_reg) \
- "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;" \
- "movl %e"STR(_reg)"x,%ds;" \
+#define SET_XEN_SEGMENTS(_reg) \
+ "movl $("STR(__HYPERVISOR_DS)"),%e"STR(_reg)"x;" \
+ "movl %e"STR(_reg)"x,%ds;" \
"movl %e"STR(_reg)"x,%es;"
-#define SAVE_ALL(_reg) \
- __SAVE_ALL_PRE \
- SET_XEN_SEGMENTS(_reg) \
+#define SAVE_ALL(_reg) \
+ __SAVE_ALL_PRE \
+ SET_XEN_SEGMENTS(_reg) \
"1:"
#else
-#define __SAVE_ALL_PRE \
- cld; \
- pushl %eax; \
- pushl %ebp; \
- pushl %edi; \
- pushl %esi; \
- pushl %edx; \
- pushl %ecx; \
- pushl %ebx; \
- testl $X86_EFLAGS_VM,UREGS_eflags(%esp); \
- jz 2f; \
- call setup_vm86_frame; \
- jmp 3f; \
- 2:testb $3,UREGS_cs(%esp); \
- jz 1f; \
- movl %ds,UREGS_ds(%esp); \
- movl %es,UREGS_es(%esp); \
- movl %fs,UREGS_fs(%esp); \
- movl %gs,UREGS_gs(%esp); \
+#define __SAVE_ALL_PRE \
+ cld; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ testl $X86_EFLAGS_VM,UREGS_eflags(%esp); \
+ jz 2f; \
+ call setup_vm86_frame; \
+ jmp 3f; \
+ 2:testb $3,UREGS_cs(%esp); \
+ jz 1f; \
+ movl %ds,UREGS_ds(%esp); \
+ movl %es,UREGS_es(%esp); \
+ movl %fs,UREGS_fs(%esp); \
+ movl %gs,UREGS_gs(%esp); \
3:
-#define SAVE_ALL_NOSEGREGS(_reg) \
- __SAVE_ALL_PRE \
+#define SAVE_ALL_NOSEGREGS(_reg) \
+ __SAVE_ALL_PRE \
1:
-#define SET_XEN_SEGMENTS(_reg) \
- movl $(__HYPERVISOR_DS),%e ## _reg ## x; \
- movl %e ## _reg ## x,%ds; \
+#define SET_XEN_SEGMENTS(_reg) \
+ movl $(__HYPERVISOR_DS),%e ## _reg ## x; \
+ movl %e ## _reg ## x,%ds; \
movl %e ## _reg ## x,%es;
-#define SAVE_ALL(_reg) \
- __SAVE_ALL_PRE \
- SET_XEN_SEGMENTS(_reg) \
+#define SAVE_ALL(_reg) \
+ __SAVE_ALL_PRE \
+ SET_XEN_SEGMENTS(_reg) \
1:
#ifdef PERF_COUNTERS
-#define PERFC_INCR(_name,_idx) \
- lock incl SYMBOL_NAME(perfcounters)+_name(,_idx,4)
+#define PERFC_INCR(_name,_idx) \
+ lock incl perfcounters+_name(,_idx,4)
#else
#define PERFC_INCR(_name,_idx)
#endif
#endif
#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
-#define XBUILD_SMP_INTERRUPT(x,v)\
-asmlinkage void x(void); \
-__asm__( \
- "\n"__ALIGN_STR"\n" \
- SYMBOL_NAME_STR(x) ":\n\t" \
- "pushl $"#v"<<16\n\t" \
- SAVE_ALL(a) \
- "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
+#define XBUILD_SMP_INTERRUPT(x,v) \
+asmlinkage void x(void); \
+__asm__( \
+ "\n"__ALIGN_STR"\n" \
+ STR(x) ":\n\t" \
+ "pushl $"#v"<<16\n\t" \
+ SAVE_ALL(a) \
+ "call "STR(smp_##x)"\n\t" \
"jmp ret_from_intr\n");
#define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
-#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
+#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
asmlinkage void x(struct cpu_user_regs * regs); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(x) ":\n\t" \
- "pushl $"#v"<<16\n\t" \
- SAVE_ALL(a) \
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "call "SYMBOL_NAME_STR(smp_##x)"\n\t" \
- "addl $4,%esp\n\t" \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+STR(x) ":\n\t" \
+ "pushl $"#v"<<16\n\t" \
+ SAVE_ALL(a) \
+ "movl %esp,%eax\n\t" \
+ "pushl %eax\n\t" \
+ "call "STR(smp_##x)"\n\t" \
+ "addl $4,%esp\n\t" \
"jmp ret_from_intr\n");
-#define BUILD_COMMON_IRQ() \
-__asm__( \
- "\n" __ALIGN_STR"\n" \
- "common_interrupt:\n\t" \
- SAVE_ALL(a) \
- "movl %esp,%eax\n\t" \
- "pushl %eax\n\t" \
- "call " SYMBOL_NAME_STR(do_IRQ) "\n\t" \
- "addl $4,%esp\n\t" \
+#define BUILD_COMMON_IRQ() \
+__asm__( \
+ "\n" __ALIGN_STR"\n" \
+ "common_interrupt:\n\t" \
+ SAVE_ALL(a) \
+ "movl %esp,%eax\n\t" \
+ "pushl %eax\n\t" \
+ "call " STR(do_IRQ) "\n\t" \
+ "addl $4,%esp\n\t" \
"jmp ret_from_intr\n");
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushl $"#nr"<<16\n\t" \
+#define BUILD_IRQ(nr) \
+asmlinkage void IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $"#nr"<<16\n\t" \
"jmp common_interrupt");
#endif /* __X86_32_ASM_DEFNS_H__ */
/* Maybe auto-generate the following two cases (quoted vs. unquoted). */
#ifndef __ASSEMBLY__
-#define SAVE_ALL \
- "cld;" \
- "pushq %rdi;" \
- "pushq %rsi;" \
- "pushq %rdx;" \
- "pushq %rcx;" \
- "pushq %rax;" \
- "pushq %r8;" \
- "pushq %r9;" \
- "pushq %r10;" \
- "pushq %r11;" \
- "pushq %rbx;" \
- "pushq %rbp;" \
- "pushq %r12;" \
- "pushq %r13;" \
- "pushq %r14;" \
+#define SAVE_ALL \
+ "cld;" \
+ "pushq %rdi;" \
+ "pushq %rsi;" \
+ "pushq %rdx;" \
+ "pushq %rcx;" \
+ "pushq %rax;" \
+ "pushq %r8;" \
+ "pushq %r9;" \
+ "pushq %r10;" \
+ "pushq %r11;" \
+ "pushq %rbx;" \
+ "pushq %rbp;" \
+ "pushq %r12;" \
+ "pushq %r13;" \
+ "pushq %r14;" \
"pushq %r15;"
-#define RESTORE_ALL \
- "popq %r15;" \
- "popq %r14;" \
- "popq %r13;" \
- "popq %r12;" \
- "popq %rbp;" \
- "popq %rbx;" \
- "popq %r11;" \
- "popq %r10;" \
- "popq %r9;" \
- "popq %r8;" \
- "popq %rax;" \
- "popq %rcx;" \
- "popq %rdx;" \
- "popq %rsi;" \
+#define RESTORE_ALL \
+ "popq %r15;" \
+ "popq %r14;" \
+ "popq %r13;" \
+ "popq %r12;" \
+ "popq %rbp;" \
+ "popq %rbx;" \
+ "popq %r11;" \
+ "popq %r10;" \
+ "popq %r9;" \
+ "popq %r8;" \
+ "popq %rax;" \
+ "popq %rcx;" \
+ "popq %rdx;" \
+ "popq %rsi;" \
"popq %rdi;"
/* Work around AMD erratum #88 */
-#define safe_swapgs \
+#define safe_swapgs \
"mfence; swapgs;"
#else
-#define SAVE_ALL \
- cld; \
- pushq %rdi; \
- pushq %rsi; \
- pushq %rdx; \
- pushq %rcx; \
- pushq %rax; \
- pushq %r8; \
- pushq %r9; \
- pushq %r10; \
- pushq %r11; \
- pushq %rbx; \
- pushq %rbp; \
- pushq %r12; \
- pushq %r13; \
- pushq %r14; \
+#define SAVE_ALL \
+ cld; \
+ pushq %rdi; \
+ pushq %rsi; \
+ pushq %rdx; \
+ pushq %rcx; \
+ pushq %rax; \
+ pushq %r8; \
+ pushq %r9; \
+ pushq %r10; \
+ pushq %r11; \
+ pushq %rbx; \
+ pushq %rbp; \
+ pushq %r12; \
+ pushq %r13; \
+ pushq %r14; \
pushq %r15;
-#define RESTORE_ALL \
- popq %r15; \
- popq %r14; \
- popq %r13; \
- popq %r12; \
- popq %rbp; \
- popq %rbx; \
- popq %r11; \
- popq %r10; \
- popq %r9; \
- popq %r8; \
- popq %rax; \
- popq %rcx; \
- popq %rdx; \
- popq %rsi; \
+#define RESTORE_ALL \
+ popq %r15; \
+ popq %r14; \
+ popq %r13; \
+ popq %r12; \
+ popq %rbp; \
+ popq %rbx; \
+ popq %r11; \
+ popq %r10; \
+ popq %r9; \
+ popq %r8; \
+ popq %rax; \
+ popq %rcx; \
+ popq %rdx; \
+ popq %rsi; \
popq %rdi;
#ifdef PERF_COUNTERS
-#define PERFC_INCR(_name,_idx) \
- pushq %rdx; \
- leaq SYMBOL_NAME(perfcounters)+_name(%rip),%rdx; \
- lock incl (%rdx,_idx,4); \
+#define PERFC_INCR(_name,_idx) \
+ pushq %rdx; \
+ leaq perfcounters+_name(%rip),%rdx; \
+ lock incl (%rdx,_idx,4); \
popq %rdx;
#else
#define PERFC_INCR(_name,_idx)
#endif
#define BUILD_SMP_INTERRUPT(x,v) XBUILD_SMP_INTERRUPT(x,v)
-#define XBUILD_SMP_INTERRUPT(x,v)\
-asmlinkage void x(void); \
-__asm__( \
- "\n"__ALIGN_STR"\n" \
- SYMBOL_NAME_STR(x) ":\n\t" \
- "pushq $0\n\t" \
- "movl $"#v",4(%rsp)\n\t" \
- SAVE_ALL \
- "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \
+#define XBUILD_SMP_INTERRUPT(x,v) \
+asmlinkage void x(void); \
+__asm__( \
+ "\n"__ALIGN_STR"\n" \
+ STR(x) ":\n\t" \
+ "pushq $0\n\t" \
+ "movl $"#v",4(%rsp)\n\t" \
+ SAVE_ALL \
+ "callq "STR(smp_##x)"\n\t" \
"jmp ret_from_intr\n");
#define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v)
-#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
+#define XBUILD_SMP_TIMER_INTERRUPT(x,v) \
asmlinkage void x(struct cpu_user_regs * regs); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(x) ":\n\t" \
- "pushq $0\n\t" \
- "movl $"#v",4(%rsp)\n\t" \
- SAVE_ALL \
- "movq %rsp,%rdi\n\t" \
- "callq "SYMBOL_NAME_STR(smp_##x)"\n\t" \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+STR(x) ":\n\t" \
+ "pushq $0\n\t" \
+ "movl $"#v",4(%rsp)\n\t" \
+ SAVE_ALL \
+ "movq %rsp,%rdi\n\t" \
+ "callq "STR(smp_##x)"\n\t" \
"jmp ret_from_intr\n");
-#define BUILD_COMMON_IRQ() \
-__asm__( \
- "\n" __ALIGN_STR"\n" \
- "common_interrupt:\n\t" \
- SAVE_ALL \
- "movq %rsp,%rdi\n\t" \
- "callq " SYMBOL_NAME_STR(do_IRQ) "\n\t" \
+#define BUILD_COMMON_IRQ() \
+__asm__( \
+ "\n" __ALIGN_STR"\n" \
+ "common_interrupt:\n\t" \
+ SAVE_ALL \
+ "movq %rsp,%rdi\n\t" \
+ "callq " STR(do_IRQ) "\n\t" \
"jmp ret_from_intr\n");
#define IRQ_NAME2(nr) nr##_interrupt(void)
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
-#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
-__asm__( \
-"\n"__ALIGN_STR"\n" \
-SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
- "pushq $0\n\t" \
- "movl $"#nr",4(%rsp)\n\t" \
+#define BUILD_IRQ(nr) \
+asmlinkage void IRQ_NAME(nr); \
+__asm__( \
+"\n"__ALIGN_STR"\n" \
+STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushq $0\n\t" \
+ "movl $"#nr",4(%rsp)\n\t" \
"jmp common_interrupt");
#endif /* __X86_64_ASM_DEFNS_H__ */